} \
DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
-VBD_SHOW(oo_req, "%d\n", be->blkif->st_oo_req);
-VBD_SHOW(rd_req, "%d\n", be->blkif->st_rd_req);
-VBD_SHOW(wr_req, "%d\n", be->blkif->st_wr_req);
-VBD_SHOW(br_req, "%d\n", be->blkif->st_br_req);
+VBD_SHOW(oo_req, "%d\n", be->blkif->st_oo_req);
+VBD_SHOW(rd_req, "%d\n", be->blkif->st_rd_req);
+VBD_SHOW(wr_req, "%d\n", be->blkif->st_wr_req);
+VBD_SHOW(br_req, "%d\n", be->blkif->st_br_req);
+VBD_SHOW(rd_sect, "%d\n", be->blkif->st_rd_sect);
+VBD_SHOW(wr_sect, "%d\n", be->blkif->st_wr_sect);
static struct attribute *vbdstat_attrs[] = {
&dev_attr_oo_req.attr,
&dev_attr_rd_req.attr,
&dev_attr_wr_req.attr,
&dev_attr_br_req.attr,
+ &dev_attr_rd_sect.attr,
+ &dev_attr_wr_sect.attr,
NULL
};
int op, operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ;
struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
unsigned int nseg;
- int ret, i;
+ int ret, i, nr_sects = 0;
tap_blkif_t *info;
uint64_t sector;
blkif_request_t *target;
req->seg[i].gref, blkif->domid);
op++;
}
+
+ nr_sects += (req->seg[i].last_sect -
+ req->seg[i].first_sect + 1);
}
ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, op);
target->id = usr_idx;
wmb(); /* blktap_poll() reads req_prod_pvt asynchronously */
info->ufe_ring.req_prod_pvt++;
+
+ if (operation == READ) {
+ blkif->st_rd_sect += nr_sects;
+ } else if (operation == WRITE) {
+ blkif->st_wr_sect += nr_sects;
+ }
+
return;
fail_flush:
return 0;
}
+/****************************************************************
+ * sysfs interface for VBD I/O requests
+ */
+
+#define VBD_SHOW(name, format, args...) \
+ static ssize_t show_##name(struct device *_dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+ { \
+ struct xenbus_device *dev = to_xenbus_device(_dev); \
+ struct backend_info *be = dev->dev.driver_data; \
+ \
+ return sprintf(buf, format, ##args); \
+ } \
+ DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
+
+VBD_SHOW(tap_oo_req, "%d\n", be->blkif->st_oo_req);
+VBD_SHOW(tap_rd_req, "%d\n", be->blkif->st_rd_req);
+VBD_SHOW(tap_wr_req, "%d\n", be->blkif->st_wr_req);
+VBD_SHOW(tap_rd_sect, "%d\n", be->blkif->st_rd_sect);
+VBD_SHOW(tap_wr_sect, "%d\n", be->blkif->st_wr_sect);
+
+static struct attribute *tapstat_attrs[] = {
+ &dev_attr_tap_oo_req.attr,
+ &dev_attr_tap_rd_req.attr,
+ &dev_attr_tap_wr_req.attr,
+ &dev_attr_tap_rd_sect.attr,
+ &dev_attr_tap_wr_sect.attr,
+ NULL
+};
+
+static struct attribute_group tapstat_group = {
+ .name = "statistics",
+ .attrs = tapstat_attrs,
+};
+
+int xentap_sysfs_addif(struct xenbus_device *dev)
+{
+ return sysfs_create_group(&dev->dev.kobj, &tapstat_group);
+}
+
+void xentap_sysfs_delif(struct xenbus_device *dev)
+{
+ sysfs_remove_group(&dev->dev.kobj, &tapstat_group);
+}
+
+static int blktap_remove(struct xenbus_device *dev)
+{
+ struct backend_info *be = dev->dev.driver_data;
+
+ if (be->backend_watch.node) {
+ unregister_xenbus_watch(&be->backend_watch);
+ kfree(be->backend_watch.node);
+ be->backend_watch.node = NULL;
+ }
+ if (be->blkif) {
+ if (be->blkif->xenblkd)
+ kthread_stop(be->blkif->xenblkd);
+ signal_tapdisk(be->blkif->dev_num);
+ tap_blkif_free(be->blkif);
+ be->blkif = NULL;
+ }
+ xentap_sysfs_delif(be->dev);
+ kfree(be);
+ dev->dev.driver_data = NULL;
+ return 0;
+}
+
static void tap_update_blkif_status(blkif_t *blkif)
{
int err;
return;
}
+ err = xentap_sysfs_addif(blkif->be->dev);
+ if (err) {
+ xenbus_dev_fatal(blkif->be->dev, err,
+ "creating sysfs entries");
+ return;
+ }
+
blkif->xenblkd = kthread_run(tap_blkif_schedule, blkif, name);
if (IS_ERR(blkif->xenblkd)) {
err = PTR_ERR(blkif->xenblkd);
}
}
-static int blktap_remove(struct xenbus_device *dev)
-{
- struct backend_info *be = dev->dev.driver_data;
-
- if (be->backend_watch.node) {
- unregister_xenbus_watch(&be->backend_watch);
- kfree(be->backend_watch.node);
- be->backend_watch.node = NULL;
- }
- if (be->blkif) {
- if (be->blkif->xenblkd)
- kthread_stop(be->blkif->xenblkd);
- signal_tapdisk(be->blkif->dev_num);
- tap_blkif_free(be->blkif);
- be->blkif = NULL;
- }
- kfree(be);
- dev->dev.driver_data = NULL;
- return 0;
-}
-
/**
* Entry point to this code when a new device is created. Allocate
* the basic structures, and watch the store waiting for the